---
title: "Meta-analysis of avian responses and butterfly eyespots"
author: "**Mizuno et al.**"
date: "`r Sys.Date()`"
format:
html:
toc: true
toc-location: left
toc-depth: 3
toc-float: true
toc-title: "**Contents**"
output-file: "index.html"
embed-resources: true
code-fold: true
code-tools: true
number-sections: true
#bibliography: ./bib/ref.bib
fontsize: "12"
max-width: "10"
code-overflow: wrap
df_print: paged
theme: minty
code-line-numbers: false
crossref:
fig-title: Figure # (default is "Figure")
tbl-title: Table # (default is "Table")
title-delim: — # (default is ":")
fig-prefix: Fig. # (default is "Figure")
tbl-prefix: Table. # (default is "Table")
editor_options:
chunk_output_type: console
execute:
warning: false
message: false
tidy: true
#cache: true
---
## Setting ups
Load packages and custum function.
Custum function is used for calculating effect size (lnRR) and its variance from raw data.
```{r}
pacman:: p_load (ape,
DT,
ggokabeito,
ggcorrplot,
here,
knitr,
tidyverse,
patchwork,
metafor,
miWQS,
orchaRd,
parallel,
uncoveR)
# function for calculating effect size (lnRR)
effect_lnRR <- function (dt) {
# replace 0 in "C_mean", "T_sd", "C_sd", "C_proportion" with each minimum values
# if proportion too extreme value, replace minimum value (only "T_proportion")
dt <- dt %>%
mutate (C_mean = ifelse (C_mean == 0 , 0.04 , C_mean))
dt <- dt %>%
mutate (T_sd = ifelse (T_sd == 0 , 0.01 , T_sd))
dt <- dt %>%
mutate (C_sd = ifelse (C_sd == 0 , 0.05 , C_sd))
dt <- dt %>%
mutate (C_proportion = ifelse (C_proportion == 0 , 0.01 , C_proportion))
dt <- dt %>%
mutate (T_proportion = ifelse (T_proportion < 0.01 , 0.01 , T_proportion))
dt <- dt %>%
mutate (T_proportion = ifelse (T_proportion == 1 , 0.9 , T_proportion))
# copy dataset for adding effect size and its variation (lnRR /lnRR_var) column
dt1 <- dt %>%
mutate (
lnRR = NA ,
lnRR_var = NA
)
for (i in seq_len (nrow (dt1))) {
Tn <- dt1$ Tn[i]
Cn <- dt1$ Cn[i]
T_mean <- dt1$ T_mean[i]
C_mean <- dt1$ C_mean[i]
T_proportion <- dt1$ T_proportion[i]
C_proportion <- dt1$ C_proportion[i]
T_sd <- dt1$ T_sd[i]
C_sd <- dt1$ C_sd[i]
Response <- dt1$ Response[i]
Measure <- dt1$ Measure[i]
Study_design <- dt1$ Study_design[i]
Direction <- dt1$ Direction[i]
# continuous data - using escalc() (metafor package)
if (Response == "continuous" & Study_design == "independent" ) {
effect <- escalc (
measure = "ROM" ,
n1i = Tn, n2i = Cn,
m1i = T_mean, m2 = C_mean,
sd1i = T_sd, sd2i = C_sd,
var.names = c ("lnRR" , "lnRR_var" )
)
dt1$ lnRR[i] <- effect$ lnRR
dt1$ lnRR_var[i] <- effect$ lnRR_var
} else if (Response == "continuous" & Study_design == "dependent" ) {
effect <- escalc (
measure = "ROMC" ,
ni = (Tn + Cn) / 2 ,
m1i = T_mean, m2 = C_mean,
sd1i = T_sd, sd2i = C_sd,
ri = 0.5 ,
var.names = c ("lnRR" , "lnRR_var" )
)
dt1$ lnRR[i] <- effect$ lnRR
dt1$ lnRR_var[i] <- effect$ lnRR_var
}
# proportion data (no sd values)
else if (Response == "proportion1" & Study_design == "independent" ) {
T_proportion <- replace (
T_proportion, Direction == "Decrease" ,
(1 - T_proportion[Direction == "Decrease" ])
)
C_proportion <- replace (
C_proportion, Direction == "Decrease" ,
(1 - C_proportion[Direction == "Decrease" ])
)
# transform proportion value
asin_trans <- function (proportion) {
trans <- asin (sqrt (proportion))
trans
}
T_proportion <- asin_trans (T_proportion)
C_proportion <- asin_trans (C_proportion)
# calculate lnRR and lnRR variance
lnRR_pro1 <- log (T_proportion / C_proportion)
lnRR_var_pro1 <- (1 / sqrt (8 ))^ 2 * (1 / (T_proportion^ 2 * Tn) +
1 / (C_proportion^ 2 * Cn))
dt1$ lnRR[i] <- lnRR_pro1
dt1$ lnRR_var[i] <- lnRR_var_pro1
} else if (Response == "proportion1" & Study_design == "dependent" ) {
T_proportion <- replace (
T_proportion, Direction == "Decrease" ,
(1 - T_proportion[Direction == "Decrease" ])
)
C_proportion <- replace (
C_proportion, Direction == "Decrease" ,
(1 - C_proportion[Direction == "Decrease" ])
)
# transform proportion value
asin_trans <- function (proportion) {
trans <- asin (sqrt (proportion))
trans
}
T_proportion <- asin_trans (T_proportion)
C_proportion <- asin_trans (C_proportion)
# calculate lnRR and lnRR variance
lnRR_pro1 <- log (T_proportion / C_proportion)
lnRR_var_pro1 <- (1 / sqrt (8 ))^ 2 * (1 / (T_proportion^ 2 * Tn) +
1 / (C_proportion^ 2 * Cn)) -
2 * 0.5 * sqrt ((1 / sqrt (8 ))^ 2 * (1 / (T_proportion^ 2 * Tn))) *
sqrt ((1 / sqrt (8 ))^ 2 * (1 / (C_proportion^ 2 * Cn)))
dt1$ lnRR[i] <- lnRR_pro1
dt1$ lnRR_var[i] <- lnRR_var_pro1
}
# proportion data (exist sd values)
else if (Response == "proportion2" & Study_design == "independent" ) {
T_proportion <- replace (
T_proportion, Direction == "Decrease" ,
(1 - T_proportion[Direction == "Decrease" ])
)
C_proportion <- replace (
C_proportion, Direction == "Decrease" ,
(1 - C_proportion[Direction == "Decrease" ])
)
# transform proportion mean value
asin_trans <- function (proportion) {
trans <- asin (sqrt (proportion))
trans
}
T_SD <- T_sd^ 2 / (4 * (T_proportion) * (1 - (T_proportion)))
C_SD <- C_sd^ 2 / (4 * (C_proportion) * (1 - (C_proportion)))
T_proportion <- asin_trans (T_proportion)
C_proportion <- asin_trans (C_proportion)
# calculate lnRR and lnRR variance
lnRR_pro2 <- log (T_proportion / C_proportion)
lnRR_var_pro2 <- (T_SD)^ 2 * (1 / (T_proportion^ 2 * Tn)) +
(C_SD)^ 2 * (1 / (C_proportion^ 2 * Cn))
dt1$ lnRR[i] <- lnRR_pro2
dt1$ lnRR_var[i] <- lnRR_var_pro2
} else if (Response == "proportion2" & Study_design == "dependent" ) {
T_proportion <- replace (
T_proportion, Direction == "Decrease" ,
(1 - T_proportion[Direction == "Decrease" ])
)
C_proportion <- replace (
C_proportion, Direction == "Decrease" ,
(1 - C_proportion[Direction == "Decrease" ])
)
# transform proportion mean value
asin_trans <- function (proportion) {
trans <- asin (sqrt (proportion))
trans
}
T_SD <- T_sd^ 2 / (4 * (T_proportion) * (1 - (T_proportion)))
C_SD <- C_sd^ 2 / (4 * (C_proportion) * (1 - (C_proportion)))
T_proportion <- asin_trans (T_proportion)
C_proportion <- asin_trans (C_proportion)
# calculate lnRR and lnRR variance
lnRR_pro2 <- log (T_proportion / C_proportion)
lnRR_var_pro2 <- (T_SD)^ 2 * (1 / (T_proportion^ 2 * Tn)) +
(C_SD)^ 2 * (1 / (C_proportion^ 2 * Cn)) -
2 * 0.5 * sqrt ((T_SD)^ 2 * (1 / (T_proportion^ 2 * Tn))) *
sqrt ((C_SD)^ 2 * (1 / (C_proportion^ 2 * Cn)))
dt1$ lnRR[i] <- lnRR_pro2
dt1$ lnRR_var[i] <- lnRR_var_pro2
}
}
return (dt1)
}
```
## Prepare datasets for analysis
::: {.panel-tabset}
### Read datasets and phylogeny
First, we used a predator dataset to check whether we need to consider phylogeney. If so, we will use predator and prey datasets for following meta-analysis.
```{r}
#| label: tbl-dataset
#| tbl-cap: "Predator dataset"
dat_predator <- read.csv (here ("data/predator_22072023.csv" ), header = T, fileEncoding = "CP932" )
dat_all <- read.csv (here ("data/all_15082023.csv" ), header = T, fileEncoding = "CP932" )
datatable (dat_predator, caption = "Predator datasets" , options = list (pageLength = 10 , scrollX = TRUE ))
```
**Table 1 - Predator datasets**
```{r}
#| label: fig-tree
#| fig-cap: "Phylogenetic tree of bird species"
trees <- read.nexus (here ("data/bird_phy.nex" ))
plot (trees[1 ])
```
### Calculate lnRR and lnRR variance
```{r}
dat_pred <- effect_lnRR (dat_predator)
dat <- effect_lnRR (dat_all)
# add obseravation id
dat_pred$ Obs_ID <- 1 : nrow (dat_pred)
dat$ Obs_ID <- 1 : nrow (dat)
# the data of diamete, area, and background are log-transformed because it is skew data
dat$ Log_diameter <- log (dat$ Diameter_pattern)
dat$ Log_area <- log (dat$ Area_pattern)
dat$ Log_background <- log (dat$ Area_background)
# use vcalc to calculate variance-covariance matrix
VCV_pred <- vcalc (vi = lnRR_var,
cluster = Cohort_ID,
subgroup = Obs_ID,
data = dat_pred)
VCV <- vcalc (vi = lnRR_var,
cluster = Cohort_ID,
obs = Obs_ID,
rho = 0.5 ,
data = dat)
```
### Final dataset for meta-analysis
I cannot attach the caption to `datatable()` format table. I need to use `kable()` format table?
```{r}
#| label: dataset
#| tbl-cap: "Our datasets included predator and prey datasets"
datatable (dat, caption = "Dataset for meta-analysis" ,
options = list (pageLength = 10 , scrollX = TRUE ))
```
**Table 2 - Dataset for meta-analysis**
### Check phylogenetic relatedness
If phylogeny do not explain heterogeniety much, we will not need to consider it and the two datasets can be merged.
We used meta-analytical models to calculate total I2 (a measure of heterogeneity not caused by sampling error; Higgins. et al., 2003) and the partial I2 explained by each random factor.
Based on this, we need not to consider phylogenetic relatedness from our meta-regressions as this random factor did not explain much of the heterogeneity between effect sizes (partial I2 < 0.001%), then we can combine two datasets (predator and prey) for meta-analysis.
```{r}
#| eval: false
phy_model <- function (cor_tree = vcv_tree){
model <- rma.mv (yi = lnRR,
V = VCV_pred,
random = list (~ 1 | Study_ID,
~ 1 | Shared_control_ID,
~ 1 | Cohort_ID,
~ 1 | Obs_ID,
~ 1 | Bird_species,
~ 1 | Phylogeny),
R = list (Phylogeny = cor_tree),
test = "t" ,
method = "REML" ,
sparse = TRUE ,
data = dat_pred)
model
}
tree_50 <- trees[1 : 50 ]
vcv_tree_50 <- map (tree_50, ~ vcv (.x, corr = TRUE ))
# Run multiple meta-analyses with 50 different trees and obtain the combined result
ma_50 <- mclapply (vcv_tree_50, phy_model, mc.cores = 8 )
```
```{r}
#| results: hide
ma_50 <- readRDS (here ("Rdata" , "ma_50.RDS" ))
# combining the results
est_50 <- map_dbl (ma_50, ~ .x$ b[[1 ]])
se_50 <- map_dbl (ma_50, ~ .x$ se)
df_50 <- c (rbind (est_50, se_50))
# creating an array required by pool.mi
my_array <- array (df_50, dim = c (1 , 2 , 50 ))
com_res <- round (pool.mi (my_array), 4 )
```
```{r}
knitr:: kable (com_res, caption = "Combined result of 50 phylogenetic trees" )
sigma2_mod <- do.call (rbind, lapply (ma_50, function (x) x$ sigma2))
sigma2_mod <- data.frame (sigma2_mod)
colnames (sigma2_mod) <- c ("sigma^2.1_Study_ID" , "sigma^2.2_SharedControl_ID" ,
"sigma^2.3_Cohort_ID" , "sigma^2.4_Obs_ID" ,
"sigma^2.5_BirdSpecies" , "sigma^2.6_Phylogeny" )
sigma2_mod <- round (colMeans (sigma2_mod), 4 )
knitr:: kable (sigma2_mod, caption = "Average variance components" )
```
::: callout-note
## We got 1000 phylogenetic trees from [BirdTree.org](https://birdtree.org)
Only 50 trees are used as in [ Nakagawa & Villemereuil (2019) ](https://doi.org/10.1093/sysbio/syy089)
:::
:::
## Meta-analysis
We used meta-analytical models to calculate total I2 (a measure of heterogeneity not caused by sampling error; Higgins. et al., 2003) and the partial I2 explained by each random factor.
::: callout-note
## Which random effects remove in our models?
We can remove _Shared control ID_ and _Cohort ID_
:::
```{r}
# I exclude cohort_ID because sigma^2.2 = 0 and I2 = 0
ma_all <- rma.mv (yi = lnRR,
V = VCV,
random = list (~ 1 | Study_ID,
~ 1 | Shared_control_ID,
~ 1 | Cohort_ID,
~ 1 | Obs_ID),
test = "t" ,
method = "REML" ,
sparse = TRUE ,
data = dat)
summary (ma_all)
i2 <- round (i2_ml (ma_all), 4 )
```
```{r}
#| echo: false
t_i2 <- t (i2)
knitr:: kable (t_i2)
```
**Table 3 - Heterogeneity of effect size**
```{r}
#| label: fig-ma_all
#| fig-cap: "Estimates of overall effect size and 95% confidence intervals"
#| fig-cap-location: bottom
orchard_plot (ma_all,
group = "Study_ID" ,
xlab = "log response ratio (lnRR)" ,
angle = 45 ) +
scale_x_discrete (labels = c ("Overall effect" )) +
scale_colour_okabe_ito ()+
scale_fill_okabe_ito ()
```
## Meta-regressions: uni-moderator
:::{.panel-tabset}
### Pattern type
Eyespot vs. conspicuous patterns - Is there significant difference of effect size between two patterns?
```{r}
#normal model
mr_eyespot <- rma.mv (yi = lnRR,
V = VCV,
mods = ~ Treatment_stimulus,
random = list (~ 1 | Study_ID,
~ 1 | Obs_ID),
test = "t" ,
method = "REML" ,
sparse = TRUE ,
data = dat)
summary (mr_eyespot)
i2_1 <- round (i2_ml (mr_eyespot), 4 )
```
```{r}
#| echo: false
t_i2_1 <- t (i2_1)
knitr:: kable (t_i2_1)
```
**Table XX. Heterogeneity of effect size**
```{r}
#| label: fig-eyespot
#| fig-cap: "Eyespot vs. conspicuous patterns"
orchard_plot (mr_eyespot,
mod = "Treatment_stimulus" ,
group = "Study_ID" ,
xlab = "log response ratio (lnRR)" ,
angle = 45 ) +
scale_colour_okabe_ito (order = 2 : 3 )+
scale_fill_okabe_ito (order = 2 : 3 )
```
```{r}
# intercept-removed model
mr_eyespot1 <- rma.mv (yi = lnRR,
V = VCV,
mods = ~ Treatment_stimulus - 1 ,
random = list (~ 1 | Study_ID,
~ 1 | Obs_ID),
test = "t" ,
method = "REML" ,
sparse = TRUE ,
data = dat)
summary (mr_eyespot1)
```
### Pattern diameter (mm)
```{r}
#| label: fig-diameter
#| fig-cap: "Effect size and pattern diameter"
mr_diameter <- rma.mv (yi = lnRR,
V = VCV,
mods = ~ Log_diameter,
random = list (~ 1 | Study_ID,
~ 1 | Obs_ID),
test = "t" ,
method = "REML" ,
sparse = TRUE ,
data = dat)
summary (mr_diameter)
bubble_plot (mr_diameter,
mod = "Log_diameter" ,
group = "Study_ID" ,
xlab = "Log-transformed diameter" )
```
### Pattern area (mm²)
```{r}
#| label: fig-area
#| fig-cap: "Effect size and pattern area"
mr_area <- rma.mv (yi = lnRR,
V = VCV,
mods = ~ Log_area,
random = list (~ 1 | Study_ID,
~ 1 | Obs_ID),
test = "t" ,
method = "REML" ,
sparse = TRUE ,
data = dat)
summary (mr_area)
bubble_plot (mr_area,
mod = "Log_area" ,
group = "Study_ID" ,
xlab = "Log-transformed area" )
```
### Pattern number
```{r}
#| label: fig-number
#| fig-cap: "Effect size and number of patterns"
mr_num <- rma.mv (yi = lnRR,
V = VCV,
mods = ~ Number_pattern,
random = list (~ 1 | Study_ID,
~ 1 | Shared_control_ID,
~ 1 | Obs_ID),
test = "t" ,
method = "REML" ,
sparse = TRUE ,
data = dat)
summary (mr_num)
bubble_plot (mr_num,
mod = "Number_pattern" ,
group = "Study_ID" ,
xlab = "Number of patterns" )
```
### Stimuli type
Our dataset includes two types of stimuli: live and artificial. Is there significant difference of effect size between two stimuli?
```{r}
#| label: fig-stimuli
#| fig-cap: "Effect size and prey type - real vs. artificial"
# normal model
mr_prey_type <- rma.mv (yi = lnRR,
V = VCV,
mods = ~ Type_prey,
random = list (~ 1 | Study_ID,
~ 1 | Obs_ID),
test = "t" ,
method = "REML" ,
sparse = TRUE ,
data = dat)
summary (mr_prey_type)
orchard_plot (mr_prey_type,
mod = "Type_prey" ,
group = "Study_ID" ,
xlab = "Type of prey" ,
angle = 45 ) +
scale_colour_okabe_ito (order = 4 : 5 )+
scale_fill_okabe_ito (order = 4 : 5 )
# intercept-removed model
mr_prey_type1 <- rma.mv (yi = lnRR,
V = VCV,
mods = ~ Type_prey - 1 ,
random = list (~ 1 | Study_ID,
~ 1 | Obs_ID),
test = "t" ,
method = "REML" ,
sparse = TRUE ,
data = dat)
summary (mr_prey_type1)
```
### Prey shape type
Our dataset includes 4 types of prey type: real butterfly, abstract butterfly, artificial prey. Is there significant difference of effect size between two stimuli?
```{r}
#| label: fig-shape
#| fig-cap: "Effect size and prey shape"
mr_prey_shape <- rma.mv (yi = lnRR,
V = VCV,
mods = ~ Shape_prey - 1 ,
random = list (~ 1 | Study_ID,
~ 1 | Shared_control_ID,
~ 1 | Obs_ID),
test = "t" ,
method = "REML" ,
sparse = TRUE ,
data = dat)
summary (mr_prey_shape)
orchard_plot (mr_prey_shape,
mod = "Shape_prey" ,
group = "Study_ID" ,
xlab = "Shape of prey" ,
angle = 45 ) +
scale_colour_okabe_ito (order = 6 : 9 )+
scale_fill_okabe_ito (order = 6 : 9 )
```
:::
## Correlation visualisation and choose moderators
Before we run multi-moderator meta-regressions, we need to consider the correlation between moderators.
Area, diameter and background seem to be correlated. We visualised the correlation between these variables.
:::{.panel-tabset}
### Visualisation of correlation
```{r}
#| label: fig-cor-continuous
#| fig-cap: "Correlation between coninuous moderators"
# correlation between continuous variables
corr_cont <- round (cor (dat[, c ("Diameter_pattern" , "Area_pattern" , "Number_pattern" , "Area_background" )]),2 )
p1 <- ggcorrplot (corr_cont, hc.order = TRUE , lab = TRUE , outline.col = "white" , colors = c ("#6D9EC1" , "white" , "#E46726" ), title = "(a) Continuous variables" )
corr_cont_log <- round (cor (dat[, c ("Log_diameter" , "Log_area" , "Number_pattern" , "Log_background" )]),2 )
p2 <- ggcorrplot (corr_cont_log, hc.order = TRUE , lab = TRUE , outline.col = "white" , colors = c ("#6D9EC1" , "white" , "#E46726" ), title = "(b) Log-transormed continuous variables" )
p1 + p2 + plot_layout (guides = 'collect' )
```
```{r}
#| label: fig-cor-categorical
#| fig-cap: "Correlation between categorical variables"
dat1 <- dat %>%
select (Treatment_stimulus, Type_prey, Shape_prey)
assocMatrix (dat1)
```
ASK Why y-axis is shown "NA"?
### Choose moderators
We used model R2 values to find better model and modelator VIF values to check multicollinearity between moderators.
Higher R2 indicates better model and VIF > 2 indicates multicollinearity.
#### R2
```{r}
r2_area <- rma.mv (yi = lnRR,
V = VCV,
mods = ~ Log_area + Number_pattern,
random = list (~ 1 | Study_ID,
~ 1 | Obs_ID),
test = "t" ,
method = "REML" ,
sparse = TRUE ,
data = dat)
r2_diameter <- rma.mv (yi = lnRR,
V = VCV,
mods = ~ Log_diameter + Number_pattern,
random = list (~ 1 | Study_ID,
~ 1 | Obs_ID),
test = "t" ,
method = "REML" ,
sparse = TRUE ,
data = dat)
# check r2 values
r2_ml (r2_area)
r2_ml (r2_diameter)
```
It seems **area** is a better predictor than **diameter** .
#### VIF
```{r}
vif_1 <- rma.mv (yi = lnRR,
V = VCV,
mods = ~ Log_area + Number_pattern +
Treatment_stimulus + Type_prey + Shape_prey,
random = list (~ 1 | Study_ID,
~ 1 | Obs_ID),
test = "t" ,
method = "REML" ,
sparse = TRUE ,
data = dat)
# we get Warning message:
# Redundant predictors dropped from the model.
vif.rma (vif_1)
```
Shape_prey has high VIF value. We removed it from the model and run new model.
```{r}
vif_2 <- rma.mv (yi = lnRR,
V = VCV,
mods = ~ Log_area + Number_pattern +
Treatment_stimulus + Type_prey,
random = list (~ 1 | Study_ID,
~ 1 | Obs_ID),
test = "t" ,
method = "REML" ,
sparse = TRUE ,
data = dat)
vif.rma (vif_2)
```
It looks good. We will use this model.
:::
## Meta-regressions: multi-moderator
:::{.panel-tabset}
### All potential moderators (full model)
Include all moderators in the model.
```{r}
mr_all <- rma.mv (yi = lnRR,
V = VCV,
mods = ~ Treatment_stimulus + Log_area
+ Number_pattern + Type_prey,
random = list (~ 1 | Study_ID,
~ 1 | Obs_ID),
test = "t" ,
method = "REML" ,
sparse = TRUE ,
data = dat)
summary (mr_all)
r2_ml (mr_all)
```
### Moderators associated with conspicuousness
Only include moderators related to conspicuousness (pattern area and number of patterns) in the model.
```{r}
mr_cons <- rma.mv (yi = lnRR,
V = VCV,
mods = ~ Log_area + Number_pattern,
random = list (~ 1 | Study_ID,
~ 1 | Obs_ID),
test = "t" ,
method = "REML" ,
sparse = TRUE ,
data = dat)
summary (mr_cons)
```
```{r}
mr_cons_1 <- rma.mv (yi = lnRR,
V = VCV,
mods = ~ Log_area + Number_pattern + Treatment_stimulus,
random = list (~ 1 | Study_ID,
~ 1 | Obs_ID),
test = "t" ,
method = "REML" ,
sparse = TRUE ,
data = dat)
summary (mr_cons_1)
```
### Moderators associated with prey
```{r}
mr_pre <- rma.mv (yi = lnRR,
V = VCV,
mods = ~ Treatment_stimulus + Type_prey,
random = list (~ 1 | Study_ID,
~ 1 | Obs_ID),
test = "t" ,
method = "REML" ,
sparse = TRUE ,
data = dat)
```
:::
## Publication bias
ASK: Is this correct?
Also, how do I number sub-captions correctly?
:::{.panel-tabset}
### Funnel plot
```{r}
#| layout-ncol: 2
#| label: fig-funnel
#| fig-cap: "Funnel plot of effect size and its standard error"
#| fig-subcap:
#| - "Effect size and its standard error"
#| - "Effect size and its inverse standard error"
# funnel plot - standard error
funnel (ma_all, yaxis = "sei" ,
xlab = "Standarized residuals" ,
ylab = "Precision (inverse of SE)" )
# funnel plot - inverse of standard error
f2 <- funnel (ma_all, yaxis = "seinv" ,
xlab = "Standarized residuals" ,
ylab = "Precision (inverse of SE)" , col = c (alpha ("orange" , 0.5 )))
# Cut outliners (cut out plots with a value of 80 or more) I think this is better…
f3 <- funnel (ma_all, yaxis = "seinv" ,
xlab = "Standarized residuals" ,
ylab = "Precision (inverse of SE)" ,
xlim = c (- 4 ,4 ),
ylim = c (0.8 , 80 ),
col = c (alpha ("mediumvioletred" , 0.5 )))
```
### Egger's test
```{r}
#| label: fig-egger
#| fig-cap: "Egger's test of publication bias"
df_bias <- dat %>% mutate (sqrt_inv_e_n = sqrt ((Cn + Tn)/ (Cn * Tn)))
bias_model <- rma.mv (yi = lnRR,
V = lnRR_var,
mods = ~ 1 + sqrt_inv_e_n,
random = list (~ 1 | Study_ID,
~ 1 | Obs_ID),
test = "t" ,
method = "REML" ,
sparse = TRUE ,
data = df_bias)
summary (bias_model)
bubble_plot (bias_model,
mod = "sqrt_inv_e_n" ,
group = "Study_ID" ,
xlab = "Square root of inverse of effective sample size" )
```
### Decline effect
```{r}
#| label: fig-decline
#| fig-cap: "Decline effect"
year_model <- rma.mv (yi = lnRR,
V = VCV,
mods = ~ 1 + Year,
random = list (~ 1 | Study_ID,
~ 1 | Obs_ID),
test = "t" ,
method = "REML" ,
sparse = TRUE ,
data = df_bias)
summary (year_model)
bubble_plot (year_model,
mod = "Year" ,
group = "Study_ID" ,
xlab = "Year of publication" )
```
:::
## Additional analyses
:::{.panel-tabset}
### Dataset
We compared effect size between two datasets (predator and prey) to see whether there is any difference in effect size between two datasets.
```{r}
#| label: fig-dataset
#| fig-cap: "Effect size and dataset - predator and prey"
mr_dataset <- rma.mv (yi = lnRR,
V = VCV,
mods = ~ Dataset,
random = list (~ 1 | Study_ID,
~ 1 | Obs_ID),
test = "t" ,
method = "REML" ,
sparse = TRUE ,
data = dat)
summary (mr_dataset)
orchard_plot (mr_dataset,
mod = "Dataset" ,
group = "Study_ID" ,
xlab = "Dataset" ,
angle = 45 )
```
### Background area (mm²)
```{r}
#| label: fig-background
#| fig-cap: "Effect size and background area"
mr_background <- rma.mv (yi = lnRR,
V = VCV,
mods = ~ Log_background,
random = list (~ 1 | Study_ID,
~ 1 | Obs_ID),
test = "t" ,
method = "REML" ,
sparse = TRUE ,
data = dat)
summary (mr_background)
bubble_plot (mr_background,
mod = "Log_background" ,
group = "Study_ID" ,
xlab = "Log-transformed backgroud" )
```
:::
## Figure galary
:::{.panel-tabset}
### Discrete moderators
```{r}
#| label: fig-galary1
#| fig-cap: "Discrete moderators"
#| include: false
#|
p1 <- orchard_plot (ma_all,
group = "Study_ID" ,
xlab = "log response ratio (lnRR)" ,
angle = 45 ,
legend.pos = "top.left" ) +
scale_x_discrete (labels = c ("Overall effect" )) +
theme (legend.direction = "vertical" ,
legend.text = element_text (size = 6 )) +
scale_colour_okabe_ito ()+
scale_fill_okabe_ito ()
p1
p2 <- orchard_plot (mr_eyespot,
mod = "Treatment_stimulus" ,
group = "Study_ID" ,
xlab = "log response ratio (lnRR)" ,
angle = 45 ,
legend.pos = "top.left" ) +
theme (legend.direction = "vertical" ,
legend.text = element_text (size = 6 )) +
scale_colour_okabe_ito (order = 2 : 3 )+
scale_fill_okabe_ito (order = 2 : 3 )
p3 <- orchard_plot (mr_prey_type,
mod = "Type_prey" ,
group = "Study_ID" ,
xlab = "log response ratio (lnRR)" ,
angle = 45 ,
legend.pos = "top.left" ) +
theme (legend.direction = "vertical" ,
legend.text = element_text (size = 6 ))+
scale_colour_okabe_ito (order = 4 : 5 ) +
scale_fill_okabe_ito (order = 4 : 5 )
patch1 <- p1 | (p2 / p3)
patch1 + plot_annotation (tag_levels = "a" )
ggsave ("fig1.pdf" , width = 10 , height = 5 , dpi = 450 )
```

### Continuous moderators
```{r}
#| label: fig-galary2
#| fig-cap: "Continuous moderators"
#| include: false
p4 <- bubble_plot (mr_area,
mod = "Log_area" ,
group = "Study_ID" ,
xlab = "Log-transformed area" ) +
scale_x_continuous (breaks = seq (0 ,7 ,1.5 )) +
scale_y_continuous (breaks = seq (- 2.5 , 4.5 , 1.5 ))
p5 <- bubble_plot (mr_num,
mod = "Number_pattern" ,
group = "Study_ID" ,
xlab = "Number of patterns" ) +
scale_x_continuous (breaks = seq (0 ,11 ,1.5 )) +
scale_y_continuous (breaks = seq (- 2.5 , 4.5 , 1.5 ))
p4 / p5 + plot_annotation (tag_levels = "a" )
ggsave ("fig2.pdf" , width = 10 , height = 10 , dpi = 450 )
```

### Publication bias
I got error message when I used `ggdraw()` function. I need to ask this later.
```{r}
#| label: fig-galary3
#| fig-cap: "Publication bias"
#| fig-subcap:
#| - "Funnel plot"
#| - "Egger's test and Decline effect"
#| layout-ncol: 2
#| include: false
funnel (ma_all, yaxis = "seinv" ,
xlab = "Standarized residuals" ,
ylab = "Precision (inverse of SE)" ,
xlim = c (- 3.0 , 4.5 ),
ylim = c (0.8 , 80 ),
col = c (alpha ("mediumvioletred" , 0.5 )),
cex = 0.7 )
p7 <- bubble_plot (bias_model,
mod = "sqrt_inv_e_n" ,
group = "Study_ID" ,
xlab = "Square root of inverse of effective sample size" ) +
scale_y_continuous (breaks = seq (- 2.5 , 4.5 , 1.5 ))
p8 <- bubble_plot (year_model,
mod = "Year" ,
group = "Study_ID" ,
xlab = "Year of publication" ) + scale_y_continuous (breaks = seq (- 2.5 , 4.0 , 1.5 ))
pub_b <- p7 / p8
pub_b + plot_annotation (tag_levels = 'a' )
ggsave ("fig4.pdf" , width = 10 , height = 10 , dpi = 450 )
```
::: {layout-ncol=2}


Publication bias
:::
:::
## R session infomation
```{r}
#| echo: false
sessionInfo () %>% pander:: pander ()
```